import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from time import sleep
from os import listdir
import matplotlib.image as mpimg
from matplotlib.animation import FuncAnimation as FA
import random
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 50)
Setting seeds for ensuring reproducibility
# import time
# # Generate a random seed based on the current time
# seed = int(time.time())
# print("Selected seed:", seed)
seed = 111222
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
📌 Loading the data:
There are 100 trajectories of engine degradation in EACH of the datasets (train and test).
For the train data, each engine is run from a certain normal condition till failure.
For the test data, each engine is NOT run until failure (or we have data until a specific point of the engine's state). The amount of time cycles left for this engine to still be 'normal' is what we call RUL, or Remaining Useful Lifetime.
Predicting what the RUL is for the last state of each machine in the test set will be our prediction task.
folder_path = './CMAPSSData/'
listdir(folder_path)
file_name = 'FD002.txt'
df_train = pd.read_csv(folder_path + 'train_' + file_name, header = None, sep = ' ')
df_test = pd.read_csv(folder_path + 'test_'+file_name, header = None, sep = ' ')
rul_test = pd.read_csv(folder_path + 'RUL_'+file_name, header = None)
for df in [df_train, df_test, rul_test]:
display(df.head())
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 34.9983 | 0.8400 | 100.0 | 449.44 | 555.32 | 1358.61 | 1137.23 | 5.48 | 8.00 | 194.64 | 2222.65 | 8341.91 | 1.02 | 42.02 | 183.06 | 2387.72 | 8048.56 | 9.3461 | 0.02 | 334 | 2223 | 100.00 | 14.73 | 8.8071 | NaN | NaN |
| 1 | 1 | 2 | 41.9982 | 0.8408 | 100.0 | 445.00 | 549.90 | 1353.22 | 1125.78 | 3.91 | 5.71 | 138.51 | 2211.57 | 8303.96 | 1.02 | 42.20 | 130.42 | 2387.66 | 8072.30 | 9.3774 | 0.02 | 330 | 2212 | 100.00 | 10.41 | 6.2665 | NaN | NaN |
| 2 | 1 | 3 | 24.9988 | 0.6218 | 60.0 | 462.54 | 537.31 | 1256.76 | 1047.45 | 7.05 | 9.02 | 175.71 | 1915.11 | 8001.42 | 0.94 | 36.69 | 164.22 | 2028.03 | 7864.87 | 10.8941 | 0.02 | 309 | 1915 | 84.93 | 14.08 | 8.6723 | NaN | NaN |
| 3 | 1 | 4 | 42.0077 | 0.8416 | 100.0 | 445.00 | 549.51 | 1354.03 | 1126.38 | 3.91 | 5.71 | 138.46 | 2211.58 | 8303.96 | 1.02 | 41.96 | 130.72 | 2387.61 | 8068.66 | 9.3528 | 0.02 | 329 | 2212 | 100.00 | 10.59 | 6.4701 | NaN | NaN |
| 4 | 1 | 5 | 25.0005 | 0.6203 | 60.0 | 462.54 | 537.07 | 1257.71 | 1047.93 | 7.05 | 9.03 | 175.05 | 1915.10 | 7993.23 | 0.94 | 36.89 | 164.31 | 2028.00 | 7861.23 | 10.8963 | 0.02 | 309 | 1915 | 84.93 | 14.13 | 8.5286 | NaN | NaN |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 9.9987 | 0.2502 | 100.0 | 489.05 | 605.03 | 1497.17 | 1304.99 | 10.52 | 15.49 | 394.54 | 2318.96 | 8763.80 | 1.26 | 45.61 | 371.69 | 2388.18 | 8114.10 | 8.6476 | 0.03 | 369 | 2319 | 100.00 | 28.42 | 17.1551 | NaN | NaN |
| 1 | 1 | 2 | 20.0026 | 0.7000 | 100.0 | 491.19 | 607.82 | 1481.20 | 1246.11 | 9.35 | 13.66 | 334.36 | 2323.95 | 8713.21 | 1.08 | 44.26 | 315.32 | 2388.12 | 8053.06 | 9.2405 | 0.02 | 364 | 2324 | 100.00 | 24.29 | 14.8039 | NaN | NaN |
| 2 | 1 | 3 | 35.0045 | 0.8400 | 100.0 | 449.44 | 556.00 | 1359.08 | 1128.36 | 5.48 | 8.00 | 193.55 | 2222.67 | 8340.20 | 1.02 | 41.80 | 183.04 | 2387.75 | 8053.04 | 9.3472 | 0.02 | 333 | 2223 | 100.00 | 14.98 | 8.9125 | NaN | NaN |
| 3 | 1 | 4 | 42.0066 | 0.8410 | 100.0 | 445.00 | 550.17 | 1349.69 | 1127.89 | 3.91 | 5.71 | 138.74 | 2211.58 | 8313.85 | 1.02 | 42.21 | 130.40 | 2387.72 | 8066.90 | 9.3961 | 0.02 | 332 | 2212 | 100.00 | 10.35 | 6.4181 | NaN | NaN |
| 4 | 1 | 5 | 24.9985 | 0.6213 | 60.0 | 462.54 | 536.72 | 1253.18 | 1050.69 | 7.05 | 9.03 | 175.75 | 1915.10 | 7997.13 | 0.94 | 36.76 | 164.56 | 2028.05 | 7865.66 | 10.8682 | 0.02 | 305 | 1915 | 84.93 | 14.31 | 8.5740 | NaN | NaN |
| 0 | |
|---|---|
| 0 | 18 |
| 1 | 79 |
| 2 | 106 |
| 3 | 110 |
| 4 | 15 |
📌 Attaching column names: We have three operational setting columns (os + number), and 21 sensor columns (s + number). We have dropped the last two columns containing NaNs.
col_names = []
col_names.append('unit')
col_names.append('time')
for i in range(1,4):
col_names.append('os'+str(i))
for i in range(1,22):
col_names.append('s'+str(i))
df_train = df_train.iloc[:,:-2].copy()
df_train.columns = col_names
display(df_train.head())
df_test = df_test.iloc[:,:-2].copy()
df_test.columns = col_names
display(df_test.head())
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 34.9983 | 0.8400 | 100.0 | 449.44 | 555.32 | 1358.61 | 1137.23 | 5.48 | 8.00 | 194.64 | 2222.65 | 8341.91 | 1.02 | 42.02 | 183.06 | 2387.72 | 8048.56 | 9.3461 | 0.02 | 334 | 2223 | 100.00 | 14.73 | 8.8071 |
| 1 | 1 | 2 | 41.9982 | 0.8408 | 100.0 | 445.00 | 549.90 | 1353.22 | 1125.78 | 3.91 | 5.71 | 138.51 | 2211.57 | 8303.96 | 1.02 | 42.20 | 130.42 | 2387.66 | 8072.30 | 9.3774 | 0.02 | 330 | 2212 | 100.00 | 10.41 | 6.2665 |
| 2 | 1 | 3 | 24.9988 | 0.6218 | 60.0 | 462.54 | 537.31 | 1256.76 | 1047.45 | 7.05 | 9.02 | 175.71 | 1915.11 | 8001.42 | 0.94 | 36.69 | 164.22 | 2028.03 | 7864.87 | 10.8941 | 0.02 | 309 | 1915 | 84.93 | 14.08 | 8.6723 |
| 3 | 1 | 4 | 42.0077 | 0.8416 | 100.0 | 445.00 | 549.51 | 1354.03 | 1126.38 | 3.91 | 5.71 | 138.46 | 2211.58 | 8303.96 | 1.02 | 41.96 | 130.72 | 2387.61 | 8068.66 | 9.3528 | 0.02 | 329 | 2212 | 100.00 | 10.59 | 6.4701 |
| 4 | 1 | 5 | 25.0005 | 0.6203 | 60.0 | 462.54 | 537.07 | 1257.71 | 1047.93 | 7.05 | 9.03 | 175.05 | 1915.10 | 7993.23 | 0.94 | 36.89 | 164.31 | 2028.00 | 7861.23 | 10.8963 | 0.02 | 309 | 1915 | 84.93 | 14.13 | 8.5286 |
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 9.9987 | 0.2502 | 100.0 | 489.05 | 605.03 | 1497.17 | 1304.99 | 10.52 | 15.49 | 394.54 | 2318.96 | 8763.80 | 1.26 | 45.61 | 371.69 | 2388.18 | 8114.10 | 8.6476 | 0.03 | 369 | 2319 | 100.00 | 28.42 | 17.1551 |
| 1 | 1 | 2 | 20.0026 | 0.7000 | 100.0 | 491.19 | 607.82 | 1481.20 | 1246.11 | 9.35 | 13.66 | 334.36 | 2323.95 | 8713.21 | 1.08 | 44.26 | 315.32 | 2388.12 | 8053.06 | 9.2405 | 0.02 | 364 | 2324 | 100.00 | 24.29 | 14.8039 |
| 2 | 1 | 3 | 35.0045 | 0.8400 | 100.0 | 449.44 | 556.00 | 1359.08 | 1128.36 | 5.48 | 8.00 | 193.55 | 2222.67 | 8340.20 | 1.02 | 41.80 | 183.04 | 2387.75 | 8053.04 | 9.3472 | 0.02 | 333 | 2223 | 100.00 | 14.98 | 8.9125 |
| 3 | 1 | 4 | 42.0066 | 0.8410 | 100.0 | 445.00 | 550.17 | 1349.69 | 1127.89 | 3.91 | 5.71 | 138.74 | 2211.58 | 8313.85 | 1.02 | 42.21 | 130.40 | 2387.72 | 8066.90 | 9.3961 | 0.02 | 332 | 2212 | 100.00 | 10.35 | 6.4181 |
| 4 | 1 | 5 | 24.9985 | 0.6213 | 60.0 | 462.54 | 536.72 | 1253.18 | 1050.69 | 7.05 | 9.03 | 175.75 | 1915.10 | 7997.13 | 0.94 | 36.76 | 164.56 | 2028.05 | 7865.66 | 10.8682 | 0.02 | 305 | 1915 | 84.93 | 14.31 | 8.5740 |
📌 Attaching RUL(remaining useful lifetime) values to the datasets.
For the train data, the RUL values are not specified, but the document regarding this dataset specifies that for the training data, all the engines were run to failure. Thus, for example, if we had five rows for a specific unit, say unit 7:
| unit | time cycle |
|---|---|
| 7 | 1 |
| 7 | 2 |
| 7 | 3 |
| 7 | 4 |
| 7 | 5 |
Then we know that the last row is when the RUL value becomes 0 (failure), so the RUL for this unit would be attached in this way:
| unit | time cycle | RUL |
|---|---|---|
| 7 | 1 | 4 |
| 7 | 2 | 3 |
| 7 | 3 | 2 |
| 7 | 4 | 1 |
| 7 | 5 | 0 |
For the test data, we have the 'solutions' for the test engines in a separate file called rul_test (the name that I used for dataframe). I will use that to attach the RUL values for the test as well.
units_training = max(df_train['unit'])
units_testing = max(df_test['unit'])
print(f'Units in the training dataset: {units_training}')
print(f'Units in the testing dataset: {units_testing}')
df_train = df_train[df_train['unit'] != 260]
if units_training > units_testing:
df_train = df_train[df_train['unit'] <= units_testing]
elif units_training < units_testing:
df_test = df_test[df_test['unit'] <= units_training]
assert max(df_train['unit']) == max(df_test['unit'])
no_units = max(df_train['unit'])
print(f'No of units in training and testing dataset after: {no_units}')
Units in the training dataset: 260 Units in the testing dataset: 259 No of units in training and testing dataset after: 259
MAX_RUL = 125
no_units = min(rul_test.shape[0],max(df_train['unit']))
print(f'units :{no_units}')
units :259
rul_list = []
engine_numbers = no_units
for n in np.arange(1,engine_numbers+1):
time_list = np.array(df_train[df_train['unit'] == n]['time'])
length = len(time_list)
rul = list(length - time_list)
rul = [min(MAX_RUL,x) for x in rul]
rul_list += rul
df_train['rul'] = rul_list
rul_list = []
for n in np.arange(1,engine_numbers+1):
time_list = np.array(df_test[df_test['unit'] == n]['time'])
length = len(time_list)
rul_val = rul_test.iloc[n-1].item()
rul = list(length - time_list + rul_val)
rul = [min(MAX_RUL,x) for x in rul]
rul_list += rul
df_test['rul'] = rul_list
for df in [df_train, df_test]:
display(df.head())
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 34.9983 | 0.8400 | 100.0 | 449.44 | 555.32 | 1358.61 | 1137.23 | 5.48 | 8.00 | 194.64 | 2222.65 | 8341.91 | 1.02 | 42.02 | 183.06 | 2387.72 | 8048.56 | 9.3461 | 0.02 | 334 | 2223 | 100.00 | 14.73 | 8.8071 | 125 |
| 1 | 1 | 2 | 41.9982 | 0.8408 | 100.0 | 445.00 | 549.90 | 1353.22 | 1125.78 | 3.91 | 5.71 | 138.51 | 2211.57 | 8303.96 | 1.02 | 42.20 | 130.42 | 2387.66 | 8072.30 | 9.3774 | 0.02 | 330 | 2212 | 100.00 | 10.41 | 6.2665 | 125 |
| 2 | 1 | 3 | 24.9988 | 0.6218 | 60.0 | 462.54 | 537.31 | 1256.76 | 1047.45 | 7.05 | 9.02 | 175.71 | 1915.11 | 8001.42 | 0.94 | 36.69 | 164.22 | 2028.03 | 7864.87 | 10.8941 | 0.02 | 309 | 1915 | 84.93 | 14.08 | 8.6723 | 125 |
| 3 | 1 | 4 | 42.0077 | 0.8416 | 100.0 | 445.00 | 549.51 | 1354.03 | 1126.38 | 3.91 | 5.71 | 138.46 | 2211.58 | 8303.96 | 1.02 | 41.96 | 130.72 | 2387.61 | 8068.66 | 9.3528 | 0.02 | 329 | 2212 | 100.00 | 10.59 | 6.4701 | 125 |
| 4 | 1 | 5 | 25.0005 | 0.6203 | 60.0 | 462.54 | 537.07 | 1257.71 | 1047.93 | 7.05 | 9.03 | 175.05 | 1915.10 | 7993.23 | 0.94 | 36.89 | 164.31 | 2028.00 | 7861.23 | 10.8963 | 0.02 | 309 | 1915 | 84.93 | 14.13 | 8.5286 | 125 |
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 9.9987 | 0.2502 | 100.0 | 489.05 | 605.03 | 1497.17 | 1304.99 | 10.52 | 15.49 | 394.54 | 2318.96 | 8763.80 | 1.26 | 45.61 | 371.69 | 2388.18 | 8114.10 | 8.6476 | 0.03 | 369 | 2319 | 100.00 | 28.42 | 17.1551 | 125 |
| 1 | 1 | 2 | 20.0026 | 0.7000 | 100.0 | 491.19 | 607.82 | 1481.20 | 1246.11 | 9.35 | 13.66 | 334.36 | 2323.95 | 8713.21 | 1.08 | 44.26 | 315.32 | 2388.12 | 8053.06 | 9.2405 | 0.02 | 364 | 2324 | 100.00 | 24.29 | 14.8039 | 125 |
| 2 | 1 | 3 | 35.0045 | 0.8400 | 100.0 | 449.44 | 556.00 | 1359.08 | 1128.36 | 5.48 | 8.00 | 193.55 | 2222.67 | 8340.20 | 1.02 | 41.80 | 183.04 | 2387.75 | 8053.04 | 9.3472 | 0.02 | 333 | 2223 | 100.00 | 14.98 | 8.9125 | 125 |
| 3 | 1 | 4 | 42.0066 | 0.8410 | 100.0 | 445.00 | 550.17 | 1349.69 | 1127.89 | 3.91 | 5.71 | 138.74 | 2211.58 | 8313.85 | 1.02 | 42.21 | 130.40 | 2387.72 | 8066.90 | 9.3961 | 0.02 | 332 | 2212 | 100.00 | 10.35 | 6.4181 | 125 |
| 4 | 1 | 5 | 24.9985 | 0.6213 | 60.0 | 462.54 | 536.72 | 1253.18 | 1050.69 | 7.05 | 9.03 | 175.75 | 1915.10 | 7997.13 | 0.94 | 36.76 | 164.56 | 2028.05 | 7865.66 | 10.8682 | 0.02 | 305 | 1915 | 84.93 | 14.31 | 8.5740 | 125 |
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
# Initialize KMeans and StandardScaler
kmeans = KMeans(n_clusters=6, random_state=42)
scaler = StandardScaler()
# Apply clustering for specific datasets
if file_name in ['FD002.txt', 'FD004.txt']:
# Ensure required columns are present
if {'os1', 'os2', 'os3'}.issubset(df_train.columns):
# Select operating conditions and scale them
operating_conditions = df_train[['os1', 'os2', 'os3']]
scaled_conditions = scaler.fit_transform(operating_conditions)
# Apply k-means clustering and assign operation_mode
df_train['operation_mode'] = kmeans.fit_predict(scaled_conditions)
else:
raise ValueError("Columns 'os1', 'os2', and 'os3' are missing in the dataset!")
else:
print(f"No clustering applied for dataset {file_name}.")
if file_name in ['FD002.txt', 'FD004.txt']:
df_test['operation_mode'] = kmeans.predict(scaler.transform(df_test[['os1', 'os2', 'os3']]))
for df in [df_train, df_test]:
display(df.head())
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | rul | operation_mode | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 34.9983 | 0.8400 | 100.0 | 449.44 | 555.32 | 1358.61 | 1137.23 | 5.48 | 8.00 | 194.64 | 2222.65 | 8341.91 | 1.02 | 42.02 | 183.06 | 2387.72 | 8048.56 | 9.3461 | 0.02 | 334 | 2223 | 100.00 | 14.73 | 8.8071 | 125 | 0 |
| 1 | 1 | 2 | 41.9982 | 0.8408 | 100.0 | 445.00 | 549.90 | 1353.22 | 1125.78 | 3.91 | 5.71 | 138.51 | 2211.57 | 8303.96 | 1.02 | 42.20 | 130.42 | 2387.66 | 8072.30 | 9.3774 | 0.02 | 330 | 2212 | 100.00 | 10.41 | 6.2665 | 125 | 5 |
| 2 | 1 | 3 | 24.9988 | 0.6218 | 60.0 | 462.54 | 537.31 | 1256.76 | 1047.45 | 7.05 | 9.02 | 175.71 | 1915.11 | 8001.42 | 0.94 | 36.69 | 164.22 | 2028.03 | 7864.87 | 10.8941 | 0.02 | 309 | 1915 | 84.93 | 14.08 | 8.6723 | 125 | 1 |
| 3 | 1 | 4 | 42.0077 | 0.8416 | 100.0 | 445.00 | 549.51 | 1354.03 | 1126.38 | 3.91 | 5.71 | 138.46 | 2211.58 | 8303.96 | 1.02 | 41.96 | 130.72 | 2387.61 | 8068.66 | 9.3528 | 0.02 | 329 | 2212 | 100.00 | 10.59 | 6.4701 | 125 | 5 |
| 4 | 1 | 5 | 25.0005 | 0.6203 | 60.0 | 462.54 | 537.07 | 1257.71 | 1047.93 | 7.05 | 9.03 | 175.05 | 1915.10 | 7993.23 | 0.94 | 36.89 | 164.31 | 2028.00 | 7861.23 | 10.8963 | 0.02 | 309 | 1915 | 84.93 | 14.13 | 8.5286 | 125 | 1 |
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | rul | operation_mode | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 9.9987 | 0.2502 | 100.0 | 489.05 | 605.03 | 1497.17 | 1304.99 | 10.52 | 15.49 | 394.54 | 2318.96 | 8763.80 | 1.26 | 45.61 | 371.69 | 2388.18 | 8114.10 | 8.6476 | 0.03 | 369 | 2319 | 100.00 | 28.42 | 17.1551 | 125 | 4 |
| 1 | 1 | 2 | 20.0026 | 0.7000 | 100.0 | 491.19 | 607.82 | 1481.20 | 1246.11 | 9.35 | 13.66 | 334.36 | 2323.95 | 8713.21 | 1.08 | 44.26 | 315.32 | 2388.12 | 8053.06 | 9.2405 | 0.02 | 364 | 2324 | 100.00 | 24.29 | 14.8039 | 125 | 3 |
| 2 | 1 | 3 | 35.0045 | 0.8400 | 100.0 | 449.44 | 556.00 | 1359.08 | 1128.36 | 5.48 | 8.00 | 193.55 | 2222.67 | 8340.20 | 1.02 | 41.80 | 183.04 | 2387.75 | 8053.04 | 9.3472 | 0.02 | 333 | 2223 | 100.00 | 14.98 | 8.9125 | 125 | 0 |
| 3 | 1 | 4 | 42.0066 | 0.8410 | 100.0 | 445.00 | 550.17 | 1349.69 | 1127.89 | 3.91 | 5.71 | 138.74 | 2211.58 | 8313.85 | 1.02 | 42.21 | 130.40 | 2387.72 | 8066.90 | 9.3961 | 0.02 | 332 | 2212 | 100.00 | 10.35 | 6.4181 | 125 | 5 |
| 4 | 1 | 5 | 24.9985 | 0.6213 | 60.0 | 462.54 | 536.72 | 1253.18 | 1050.69 | 7.05 | 9.03 | 175.75 | 1915.10 | 7997.13 | 0.94 | 36.76 | 164.56 | 2028.05 | 7865.66 | 10.8682 | 0.02 | 305 | 1915 | 84.93 | 14.31 | 8.5740 | 125 | 1 |
Standard normalization (z-score normalization), the data is scaled to have a mean of 0 and a standard deviation of 1.
sensor_colums = ['s'+str(i) for i in range(1,22)]
print(sensor_colums)
['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14', 's15', 's16', 's17', 's18', 's19', 's20', 's21']
## Z-score Normalization
if file_name in ['FD001.txt', 'FD003.txt']:
# Dictionary to store mean and std for columns containing 's'
mean_std_dict = {}
# Calculate mean and std for 's' columns in df_train
for c in sensor_colums:
mean_std_dict[c + '_mean'] = df_train[c].mean()
mean_std_dict[c + '_std'] = df_train[c].std()
# Apply standard normalization to df_train
for c in sensor_colums:
df_train[c] = (df_train[c] - mean_std_dict[c + '_mean']) / (mean_std_dict[c + '_std'] + 1e-6)
# Apply standard normalization to df_test using df_train's mean and std
for c in sensor_colums:
df_test[c] = (df_test[c] - mean_std_dict[c + '_mean']) / (mean_std_dict[c + '_std'] + 1e-6)
if file_name in ['FD002.txt', 'FD004.txt']:
# Function to normalize each group based on its operation_mode
def z_score_normalize_by_mode(group):
group = group.copy() # To avoid modifying the original data
for c in sensor_colums:
group[c] = (group[c] - group[c].mean()) / (group[c].std() + 1e-6)
return group
# Normalize training data
df_train_normalized = df_train.groupby('operation_mode', group_keys=False).apply(z_score_normalize_by_mode)
# Normalize test data using train stats for each operation_mode
for mode in df_train['operation_mode'].unique():
train_group = df_train[df_train['operation_mode'] == mode]
test_group = df_test[df_test['operation_mode'] == mode]
for c in sensor_colums:
# Ensure the column dtype is compatible
df_test[c] = df_test[c].astype(float)
mean = train_group[c].mean()
std = train_group[c].std()
df_test.loc[df_test['operation_mode'] == mode, c] = (
df_test.loc[df_test['operation_mode'] == mode, c] - mean
) / (std+1e-6)
df_train = df_train_normalized.copy()
C:\Users\spiro\AppData\Local\Temp\ipykernel_14036\182794737.py:10: DeprecationWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.
df_train_normalized = df_train.groupby('operation_mode', group_keys=False).apply(z_score_normalize_by_mode)
if file_name in ['FD002.txt', 'FD004.txt']:
df_train = df_train.drop('operation_mode', axis = 1)
df_test = df_test.drop('operation_mode', axis = 1)
os_colums = ['os'+str(i) for i in range(1,4)]
print(os_colums)
mean_std_dict = {}
# Calculate mean and std for 's' columns in df_train
for c in os_colums:
mean_std_dict[c + '_mean'] = df_train[c].mean()
mean_std_dict[c + '_std'] = df_train[c].std()
# Apply standard normalization to df_train
for c in os_colums:
df_train[c] = (df_train[c] - mean_std_dict[c + '_mean']) / (mean_std_dict[c + '_std'] + 1e-6)
# Apply standard normalization to df_test using df_train's mean and std
for c in os_colums:
df_test[c] = (df_test[c] - mean_std_dict[c + '_mean']) / (mean_std_dict[c + '_std'] + 1e-6)
# Display the first few rows of both datasets
for df in [df_train, df_test]:
display(df.head())
['os1', 'os2', 'os3']
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 0.745835 | 0.864179 | 0.418061 | 1.136868e-07 | -1.091041 | -1.442268 | 0.767886 | -8.881784e-10 | -0.172002 | 0.414295 | -1.409091 | -0.837981 | -0.201077 | 0.161557 | 0.099799 | -1.403971 | -1.540288 | 0.358858 | 0.000000e+00 | -0.167936 | 0.0 | 0.000000e+00 | -0.889803 | -1.350077 | 125 |
| 1 | 1 | 2 | 1.220498 | 0.866760 | 0.418061 | 0.000000e+00 | 0.448386 | -0.221928 | -0.266703 | 1.332268e-09 | -1.184054 | -0.272014 | -1.344538 | -1.334196 | -0.155178 | 0.211507 | -0.377958 | -1.216867 | -1.043857 | 0.059079 | 0.000000e+00 | -0.758976 | 0.0 | 0.000000e+00 | -1.631696 | -1.314162 | 125 |
| 2 | 1 | 3 | 0.067770 | 0.160340 | -2.391952 | -5.684342e-08 | 1.242838 | -1.125400 | -0.424638 | 8.881784e-10 | -1.858697 | 0.637158 | -1.177852 | -1.007012 | 0.088545 | -0.536546 | -1.023371 | -1.034022 | -1.052041 | -0.462180 | 3.469447e-12 | 1.278396 | 0.0 | -1.421085e-08 | -1.628991 | 1.721647 | 125 |
| 3 | 1 | 4 | 1.221142 | 0.869340 | 0.418061 | 0.000000e+00 | -0.452125 | -0.078114 | -0.188162 | 1.332268e-09 | -1.184054 | -0.385907 | -1.310909 | -1.334196 | -0.155178 | -0.793303 | 0.502730 | -1.372719 | -1.266719 | -0.596009 | 0.000000e+00 | -1.469067 | 0.0 | 0.000000e+00 | 0.034683 | 1.847031 | 125 |
| 4 | 1 | 5 | 0.067885 | 0.155501 | -2.391952 | -5.684342e-08 | 0.572215 | -0.945363 | -0.354104 | 8.881784e-10 | 0.536134 | -0.836580 | -1.216606 | -1.596519 | 0.088545 | 0.422942 | -0.761611 | -1.143910 | -1.335676 | -0.411166 | 3.469447e-12 | 1.278396 | 0.0 | -1.421085e-08 | -1.183058 | -0.450567 | 125 |
| unit | time | os1 | os2 | os3 | s1 | s2 | s3 | s4 | s5 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s16 | s17 | s18 | s19 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | -0.949386 | -1.038316 | 0.418061 | -1.136868e-07 | 0.242928 | -0.838184 | -0.743908 | 0.000000e+00 | -0.700137 | 0.322429 | 0.013938 | -1.192453 | 0.019211 | 0.481647 | 0.459288 | 0.181946 | -1.319893 | -0.448464 | -1.040834e-11 | -0.471416 | 0.0 | 0.000000e+00 | -0.736388 | 0.459398 | 125 |
| 1 | 1 | 2 | -0.271022 | 0.412587 | 0.418061 | 1.136868e-07 | 0.549340 | -0.759980 | -0.901197 | 1.776357e-09 | 0.711186 | -0.223175 | -0.566626 | -0.966775 | 0.541651 | -0.793310 | 0.899749 | -0.263669 | -0.851829 | 0.197927 | -5.228824e-01 | -0.943899 | 0.0 | 0.000000e+00 | -1.213522 | 1.644883 | 125 |
| 2 | 1 | 3 | 0.746255 | 0.864179 | 0.418061 | 1.136868e-07 | 0.456540 | -1.358632 | -0.430256 | -8.881784e-10 | -0.172002 | -1.873362 | -1.336677 | -0.939530 | -0.201077 | -0.794553 | 0.046613 | -1.302880 | -1.247410 | 0.387682 | 0.000000e+00 | -0.880128 | 0.0 | 0.000000e+00 | 1.319888 | 0.219312 | 125 |
| 3 | 1 | 4 | 1.221067 | 0.867405 | 0.418061 | 0.000000e+00 | 1.071817 | -0.848672 | 0.009502 | 1.332268e-09 | -1.184054 | 0.251898 | -1.310909 | -0.780769 | -0.155178 | 0.253375 | -0.436671 | -1.029844 | -1.374477 | 0.557052 | 0.000000e+00 | 0.661205 | 0.0 | 0.000000e+00 | -2.187156 | 1.039654 | 125 |
| 4 | 1 | 5 | 0.067749 | 0.158727 | -2.391952 | -5.684342e-08 | -0.405778 | -1.803852 | 0.051462 | 8.881784e-10 | 0.536134 | 0.726475 | -1.216606 | -1.315802 | 0.088545 | -0.200725 | -0.034498 | -0.960764 | -0.990483 | -1.062757 | 3.469447e-12 | -1.821018 | 0.0 | -1.421085e-08 | 0.422298 | 0.235714 | 125 |
Below, we can see an example of all the sensor values of a specific engine in the training set (unit 4), as the engine progresses toward failure.
sample = 4
sample_df = df_train[df_train['unit'] == sample].copy()
# Select only sensor columns
n_sensors = len(sensor_colums)
rows = (n_sensors + 2) // 3 # Calculate rows for the grid
# Create subplots
fig, axes = plt.subplots(rows, 3, figsize=(15, 5 * rows))
# Flatten axes for easier iteration
axes = axes.flatten()
# Plot each sensor's data
for j, c in enumerate(sensor_colums):
axes[j].plot(sample_df['time'], sample_df[c], label='Sensor data')
axes[j].set_title(c)
# axes[j].set_xlabel('Time')
# axes[j].set_ylabel('Value')
axes[j].legend()
# Hide unused subplots
for ax in axes[n_sensors:]:
ax.axis('off')
# Adjust layout for better spacing
fig.tight_layout(pad=3.0)
plt.savefig(f'figures\\sensor_of_engine_{sample}_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
We plot the sensor data obtained from sensor 2 for 5 of the engines to show a general trend.
sensor = 's2'
for sample in range(1,6):
sample_df = df_train[df_train['unit'] == sample].copy()
sensordata = sample_df[sensor].to_numpy()
plt.plot(sensordata, label = "engine "+str(sample))
plt.grid()
plt.legend()
plt.ylabel('Sensor 2')
plt.xlabel('Cycle')
plt.savefig(f'figures\\sensor_{sensor}_of_engines_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
import seaborn as sns
import matplotlib.pyplot as plt
# Compute correlation matrix
corr_matrix = df_train.corr()
# Plot heatmap
plt.figure(figsize=(20, 14))
sns.heatmap(corr_matrix, annot=True, cmap="coolwarm", fmt=".2f")
plt.title("Feature Correlation Matrix")
plt.savefig(f'figures\\Feature_Correlation_Matrix_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
Xt = df_train.iloc[:,2:-1]
yt = df_train.iloc[:,-1]
rf = RandomForestRegressor()
rf.fit(Xt, yt)
# Get feature importance
importance = pd.Series(rf.feature_importances_, index=Xt.columns)
importance = importance.sort_values(ascending=False)
print("Feature Importances:\n", importance)
Feature Importances: s11 0.574699 s9 0.096874 s4 0.085477 s15 0.048489 s2 0.020919 s14 0.020525 s3 0.018375 s12 0.017579 s7 0.016750 s21 0.015891 s20 0.014987 s13 0.013842 s17 0.013637 s8 0.012789 os1 0.011209 os2 0.006768 s6 0.005434 s10 0.003202 s16 0.000954 s1 0.000842 os3 0.000760 s5 0.000000 s18 0.000000 s19 0.000000 dtype: float64
# Combine thresholding and sorting
threshold = 1e-3
low_importance_features = importance[importance < threshold].sort_values()
print("Low importance features sorted:\n", low_importance_features)
Low importance features sorted: s5 0.000000 s18 0.000000 s19 0.000000 os3 0.000760 s1 0.000842 s16 0.000954 dtype: float64
We can notice here, that there are multiple sensors which are not changing its value. Perhaps, they are not useful features for prediction. Would they have similar behaviors for other engine units as well? Below show that yes (standard deviation is 0, or practically 0).
df_train[low_importance_features.index].describe()
| s5 | s18 | s19 | os3 | s1 | s16 | |
|---|---|---|---|---|---|---|
| count | 5.344300e+04 | 53443.0 | 5.344300e+04 | 5.344300e+04 | 5.344300e+04 | 5.344300e+04 |
| mean | 6.970081e-11 | 0.0 | -2.114225e-09 | 2.566000e-16 | -8.397335e-09 | -3.127277e-12 |
| std | 1.743615e-09 | 0.0 | 5.057220e-09 | 9.999999e-01 | 9.042583e-08 | 3.886432e-01 |
| min | -3.552714e-09 | 0.0 | -1.421085e-08 | -2.391952e+00 | -1.136868e-07 | -5.228824e-01 |
| 25% | -8.881784e-10 | 0.0 | 0.000000e+00 | 4.180606e-01 | -1.136868e-07 | -1.387779e-11 |
| 50% | 8.881784e-10 | 0.0 | 0.000000e+00 | 4.180606e-01 | 0.000000e+00 | 0.000000e+00 |
| 75% | 1.332268e-09 | 0.0 | 0.000000e+00 | 4.180606e-01 | 1.136868e-07 | 0.000000e+00 |
| max | 1.776357e-09 | 0.0 | 0.000000e+00 | 4.180606e-01 | 1.136868e-07 | 1.911308e+00 |
📌 Previously mentioned columns are dropped.
#Drop os3, s1, s5, s6, s10, s16, s18, s19 from both train and test
drop_cols1 = low_importance_features.index
df_train = df_train.drop(drop_cols1, axis = 1)
df_test = df_test.drop(drop_cols1, axis = 1)
for df in [df_train, df_test]:
display(df.head())
| unit | time | os1 | os2 | s2 | s3 | s4 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s17 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 0.745835 | 0.864179 | -1.091041 | -1.442268 | 0.767886 | -0.172002 | 0.414295 | -1.409091 | -0.837981 | -0.201077 | 0.161557 | 0.099799 | -1.403971 | -1.540288 | 0.358858 | -0.167936 | -0.889803 | -1.350077 | 125 |
| 1 | 1 | 2 | 1.220498 | 0.866760 | 0.448386 | -0.221928 | -0.266703 | -1.184054 | -0.272014 | -1.344538 | -1.334196 | -0.155178 | 0.211507 | -0.377958 | -1.216867 | -1.043857 | 0.059079 | -0.758976 | -1.631696 | -1.314162 | 125 |
| 2 | 1 | 3 | 0.067770 | 0.160340 | 1.242838 | -1.125400 | -0.424638 | -1.858697 | 0.637158 | -1.177852 | -1.007012 | 0.088545 | -0.536546 | -1.023371 | -1.034022 | -1.052041 | -0.462180 | 1.278396 | -1.628991 | 1.721647 | 125 |
| 3 | 1 | 4 | 1.221142 | 0.869340 | -0.452125 | -0.078114 | -0.188162 | -1.184054 | -0.385907 | -1.310909 | -1.334196 | -0.155178 | -0.793303 | 0.502730 | -1.372719 | -1.266719 | -0.596009 | -1.469067 | 0.034683 | 1.847031 | 125 |
| 4 | 1 | 5 | 0.067885 | 0.155501 | 0.572215 | -0.945363 | -0.354104 | 0.536134 | -0.836580 | -1.216606 | -1.596519 | 0.088545 | 0.422942 | -0.761611 | -1.143910 | -1.335676 | -0.411166 | 1.278396 | -1.183058 | -0.450567 | 125 |
| unit | time | os1 | os2 | s2 | s3 | s4 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s17 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | -0.949386 | -1.038316 | 0.242928 | -0.838184 | -0.743908 | -0.700137 | 0.322429 | 0.013938 | -1.192453 | 0.019211 | 0.481647 | 0.459288 | 0.181946 | -1.319893 | -0.448464 | -0.471416 | -0.736388 | 0.459398 | 125 |
| 1 | 1 | 2 | -0.271022 | 0.412587 | 0.549340 | -0.759980 | -0.901197 | 0.711186 | -0.223175 | -0.566626 | -0.966775 | 0.541651 | -0.793310 | 0.899749 | -0.263669 | -0.851829 | 0.197927 | -0.943899 | -1.213522 | 1.644883 | 125 |
| 2 | 1 | 3 | 0.746255 | 0.864179 | 0.456540 | -1.358632 | -0.430256 | -0.172002 | -1.873362 | -1.336677 | -0.939530 | -0.201077 | -0.794553 | 0.046613 | -1.302880 | -1.247410 | 0.387682 | -0.880128 | 1.319888 | 0.219312 | 125 |
| 3 | 1 | 4 | 1.221067 | 0.867405 | 1.071817 | -0.848672 | 0.009502 | -1.184054 | 0.251898 | -1.310909 | -0.780769 | -0.155178 | 0.253375 | -0.436671 | -1.029844 | -1.374477 | 0.557052 | 0.661205 | -2.187156 | 1.039654 | 125 |
| 4 | 1 | 5 | 0.067749 | 0.158727 | -0.405778 | -1.803852 | 0.051462 | 0.536134 | 0.726475 | -1.216606 | -1.315802 | 0.088545 | -0.200725 | -0.034498 | -0.960764 | -0.990483 | -1.062757 | -1.821018 | 0.422298 | 0.235714 | 125 |
📌 Splitting Train and Validation Sets: Out of the 100 engines in the training set, I will randomly take out 20 engines for validation.
ratio = 0.8 #Ratio of training and validation datasets
units = np.arange(1, no_units+1)
no_selected = round(ratio * engine_numbers)
train_units = list(np.random.choice(units,no_selected, replace = False))
val_units = list(set(units) - set(train_units))
print(val_units)
train_data = df_train[df_train['unit'].isin(train_units)].copy()
val_data = df_train[df_train['unit'].isin(val_units)].copy()
[128, 5, 10, 13, 141, 143, 144, 18, 20, 149, 150, 25, 155, 31, 39, 43, 46, 56, 184, 58, 186, 189, 62, 64, 73, 77, 78, 79, 80, 206, 211, 86, 216, 91, 219, 220, 97, 98, 101, 230, 104, 105, 233, 238, 114, 243, 244, 245, 248, 249, 124, 255]
📌 The time series for sensor values were noisy. If the time-series values are $t_1, t_2, t_3, ..., t_n$, then the smoothed values $v_1, v_2, ..., v_n$ with the parameter $\beta$ follow the following formula:
$$v_0 = 0, v_{t} = \frac{\beta v_{t-1} + (1-\beta) x_{t}}{1-\beta^{t}}$$
#Smoothing Function: Exponentially Weighted Averages
def smooth(s, b = 0.98):
v = np.zeros(len(s)+1) #v_0 is already 0.
bc = np.zeros(len(s)+1)
for i in range(1, len(v)): #v_t = 0.95
v[i] = (b * v[i-1] + (1-b) * s[i-1])
bc[i] = 1 - b**i
sm = v[1:] / bc[1:]
return sm
# s = [1,2,3,4,5]
# print(s)
# print(f'After Smoothing: {smooth(s)}')
#Smoothing each time series for each engine in both training and test sets
beta = 0.98
# if file_name=='FD002' or file_name=='FD004':
# beta = 0.8
# Verify data integrity
assert 'unit' in df_train.columns, "The 'unit' column is missing in df_train"
assert 'unit' in df_test.columns, "The 'unit' column is missing in df_test"
def smooth_series(df, beta, unit_col='unit', sensor_prefix='s'):
"""
Smooths time series for each engine and sensor column in the dataset.
"""
for col in df.columns:
if sensor_prefix in col:
sm_list = []
for unit in df[unit_col].unique():
# Get sensor data for the current unit
unit_data = df[df[unit_col] == unit]
s = np.array(unit_data[col].copy())
sm = list(smooth(s, beta)) # Apply smoothing
sm_list.extend(sm) # Append smoothed data
# Check the length match
if len(sm_list) != len(df):
raise ValueError(f"Length mismatch for column {col}: sm_list={len(sm_list)}, df={len(df)}")
# Add smoothed column to DataFrame
df[col + '_smoothed'] = sm_list
return df
# Apply smoothing to training and test sets
df_train = smooth_series(df_train, beta)
df_test = smooth_series(df_test, beta)
Let's take a look at how smoothed values (salmon) look compared to the original series (lightblue), for a particular sensor (unit 10 from train)
df_train.columns
Index(['unit', 'time', 'os1', 'os2', 's2', 's3', 's4', 's6', 's7', 's8', 's9',
's10', 's11', 's12', 's13', 's14', 's15', 's17', 's20', 's21', 'rul',
'os1_smoothed', 'os2_smoothed', 's2_smoothed', 's3_smoothed',
's4_smoothed', 's6_smoothed', 's7_smoothed', 's8_smoothed',
's9_smoothed', 's10_smoothed', 's11_smoothed', 's12_smoothed',
's13_smoothed', 's14_smoothed', 's15_smoothed', 's17_smoothed',
's20_smoothed', 's21_smoothed'],
dtype='object')
sample_unit = 10
sample_df = df_train[df_train['unit'] == sample_unit].copy()
# Select sensor columns
sensor_cols = [c for c in df_train.columns if 's' in c and 'smoothed' not in c]
n_sensors = len(sensor_cols)
rows = (n_sensors + 2) // 3
# Create subplots
fig, axes = plt.subplots(rows, 3, figsize=(15, 5 * rows))
fig.tight_layout(pad=3.0)
# Flatten axes for easier iteration
axes = axes.flatten()
# Plot each sensor's data
for j, c in enumerate(sensor_cols):
ymin, ymax = sample_df[c].min(), sample_df[c].max()
axes[j].plot(sample_df['time'], sample_df[c], c='lightblue', label='original')
if c + '_smoothed' in sample_df.columns:
axes[j].plot(sample_df['time'], sample_df[c + '_smoothed'], c='salmon', label='smoothed')
axes[j].plot([10, 10], [ymin, ymax], c='black') # Vertical line
axes[j].set_title(c)
axes[j].legend()
# Hide unused subplots
for ax in axes[n_sensors:]:
ax.axis('off')
plt.savefig(f'figures\\Smoothing_Sensors_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
#Remove the original series
for c in df_train.columns:
if ('s' in c) and ('smoothed' not in c):
df_train[c] = df_train[c+'_smoothed']
df_train.drop(c+'_smoothed', axis = 1, inplace = True)
for c in df_test.columns:
if ('s' in c) and ('smoothed' not in c):
df_test[c] = df_test[c+'_smoothed']
df_test.drop(c+'_smoothed', axis = 1, inplace = True)
for df in [df_train, df_test]:
display(df.head())
| unit | time | os1 | os2 | s2 | s3 | s4 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s17 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 0.745835 | 0.864179 | -1.091041 | -1.442268 | 0.767886 | -0.172002 | 0.414295 | -1.409091 | -0.837981 | -0.201077 | 0.161557 | 0.099799 | -1.403971 | -1.540288 | 0.358858 | -0.167936 | -0.889803 | -1.350077 | 125 |
| 1 | 1 | 2 | 0.985564 | 0.865483 | -0.313553 | -0.825935 | 0.245366 | -0.683140 | 0.067675 | -1.376489 | -1.088595 | -0.177896 | 0.186785 | -0.141493 | -1.309474 | -1.289565 | 0.207455 | -0.466441 | -1.264496 | -1.331938 | 125 |
| 2 | 1 | 3 | 0.673431 | 0.625671 | 0.215760 | -0.927780 | 0.017504 | -1.082935 | 0.261350 | -1.308934 | -1.060849 | -0.087282 | -0.059213 | -0.441410 | -1.215796 | -1.208786 | -0.020281 | 0.126960 | -1.388457 | -0.293445 | 125 |
| 3 | 1 | 4 | 0.814536 | 0.688446 | 0.043695 | -0.708883 | -0.035481 | -1.108986 | 0.094600 | -1.309443 | -1.131271 | -0.104774 | -0.248334 | -0.198175 | -1.256223 | -1.223711 | -0.168604 | -0.284218 | -1.021819 | 0.257998 | 125 |
| 4 | 1 | 5 | 0.659112 | 0.577508 | 0.153713 | -0.758109 | -0.101806 | -0.766535 | -0.099236 | -1.290118 | -1.228118 | -0.064532 | -0.108600 | -0.315461 | -1.232844 | -1.247018 | -0.219096 | 0.041058 | -1.055383 | 0.110502 | 125 |
| unit | time | os1 | os2 | s2 | s3 | s4 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s17 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | -0.949386 | -1.038316 | 0.242928 | -0.838184 | -0.743908 | -0.700137 | 0.322429 | 0.013938 | -1.192453 | 0.019211 | 0.481647 | 0.459288 | 0.181946 | -1.319893 | -0.448464 | -0.471416 | -0.736388 | 0.459398 | 125 |
| 1 | 1 | 2 | -0.606778 | -0.305537 | 0.397681 | -0.798687 | -0.823346 | 0.012652 | 0.046872 | -0.279276 | -1.078474 | 0.283069 | -0.162270 | 0.681743 | -0.043112 | -1.083497 | -0.122004 | -0.710043 | -0.977365 | 1.058128 | 125 |
| 2 | 1 | 3 | -0.146625 | 0.092271 | 0.417699 | -0.989118 | -0.689660 | -0.050147 | -0.606180 | -0.638888 | -1.031221 | 0.118416 | -0.377303 | 0.465742 | -0.471546 | -1.139242 | 0.051335 | -0.767887 | -0.196092 | 0.772855 | 125 |
| 3 | 1 | 4 | 0.205728 | 0.291966 | 0.586217 | -0.952936 | -0.509538 | -0.342271 | -0.385117 | -0.812018 | -0.966698 | 0.047931 | -0.214824 | 0.233257 | -0.615379 | -1.199845 | 0.181621 | -0.399716 | -0.709043 | 0.841589 | 125 |
| 4 | 1 | 5 | 0.177007 | 0.264231 | 0.379721 | -1.130064 | -0.392759 | -0.159421 | -0.153726 | -0.896238 | -1.039368 | 0.056385 | -0.211889 | 0.177520 | -0.687275 | -1.156264 | -0.077411 | -0.695576 | -0.473541 | 0.715469 | 125 |
📌 When we look at the length of the trajectories for each unit in both the training and test sets, thus 200 engines, we see that for the training set that had the entire trajectories, have minimum length of 128. However, for the test set, we have a minimum value of 31. This means that if we were to predict the final RUL for every test engine unit, we cannot use a window size greater than 31 for training the model. Furthermore, if you take a look at the smoothed series visualization, I put a black vertical line for every graph, at about time step 10. This is because, when we smooth using exponentially weighted averages, seems like in the beginning, it seems a little shaky. Thus, I will be taking that part out of the training process. This will happen for test data as well. Thus, the maximum window size we can take is 31 - 10 = 21. I will go with 20.
print('training set time cycles:')
display(df_train.groupby('unit')['time'].max().describe())
print('test set time cycles:')
display(df_test.groupby('unit')['time'].max().describe())
training set time cycles:
count 259.000000 mean 206.343629 std 46.374880 min 128.000000 25% 174.000000 50% 199.000000 75% 229.500000 max 378.000000 Name: time, dtype: float64
test set time cycles:
count 259.000000 mean 131.239382 std 63.085925 min 21.000000 25% 76.500000 50% 132.000000 75% 168.000000 max 367.000000 Name: time, dtype: float64
n_features = len([c for c in df_train.columns if 's' in c]) + 1 #plus one for time
window = 15
print(f'number of features: {n_features}, window size: {window}')
number of features: 19, window size: 15
📌 Splitting Train and Validation Sets: Out of the 100 engines in the training set, I will randomly take out 20 engines for validation.
train_indices = list(train_data[(train_data['rul'] >= (window - 1)) & (train_data['time'] > 10)].index)
val_indices = list(val_data[(val_data['rul'] >= (window - 1)) & (val_data['time'] > 10)].index)
📌 Normalize the RUL index dividing by maximum value
rul_max = max(df_train['rul'])
df_train['rul'] = df_train['rul'] / rul_max
df_test['rul'] = df_test['rul'] / rul_max
rul_max
125
📌 Z-Normalize the time index
# max_time = max(df_train['time'])
df_train_mean = df_train['time'].mean()
df_train_std = df_train['time'].std()
df_train['time'] = (df_train['time'] - df_train_mean) / df_train_std
df_test['time'] = (df_test['time'] - df_train_mean) / df_train_std
display(df_train)
| unit | time | os1 | os2 | s2 | s3 | s4 | s6 | s7 | s8 | s9 | s10 | s11 | s12 | s13 | s14 | s15 | s17 | s20 | s21 | rul | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | -1.564948 | 0.745835 | 0.864179 | -1.091041 | -1.442268 | 0.767886 | -0.172002 | 0.414295 | -1.409091 | -0.837981 | -0.201077 | 0.161557 | 0.099799 | -1.403971 | -1.540288 | 0.358858 | -0.167936 | -0.889803 | -1.350077 | 1.000 |
| 1 | 1 | -1.550439 | 0.985564 | 0.865483 | -0.313553 | -0.825935 | 0.245366 | -0.683140 | 0.067675 | -1.376489 | -1.088595 | -0.177896 | 0.186785 | -0.141493 | -1.309474 | -1.289565 | 0.207455 | -0.466441 | -1.264496 | -1.331938 | 1.000 |
| 2 | 1 | -1.535930 | 0.673431 | 0.625671 | 0.215760 | -0.927780 | 0.017504 | -1.082935 | 0.261350 | -1.308934 | -1.060849 | -0.087282 | -0.059213 | -0.441410 | -1.215796 | -1.208786 | -0.020281 | 0.126960 | -1.388457 | -0.293445 | 1.000 |
| 3 | 1 | -1.521422 | 0.814536 | 0.688446 | 0.043695 | -0.708883 | -0.035481 | -1.108986 | 0.094600 | -1.309443 | -1.131271 | -0.104774 | -0.248334 | -0.198175 | -1.256223 | -1.223711 | -0.168604 | -0.284218 | -1.021819 | 0.257998 | 1.000 |
| 4 | 1 | -1.506913 | 0.659112 | 0.577508 | 0.153713 | -0.758109 | -0.101806 | -0.766535 | -0.099236 | -1.290118 | -1.228118 | -0.064532 | -0.108600 | -0.315461 | -1.232844 | -1.247018 | -0.219096 | 0.041058 | -1.055383 | 0.110502 | 1.000 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 53438 | 259 | 1.336785 | -0.059230 | -0.068402 | 0.378548 | 0.252621 | 0.345081 | 0.118842 | -0.485956 | 0.029274 | -0.230880 | 0.012758 | 0.432511 | -0.404250 | 0.024589 | -0.304203 | 0.503069 | 0.325914 | -0.372547 | -0.273579 | 0.032 |
| 53439 | 259 | 1.351294 | -0.033183 | -0.049430 | 0.401107 | 0.304912 | 0.372102 | 0.092337 | -0.491335 | 0.008852 | -0.235612 | 0.009342 | 0.468046 | -0.417451 | 0.006309 | -0.313110 | 0.538514 | 0.361627 | -0.352962 | -0.276555 | 0.024 |
| 53440 | 259 | 1.365802 | -0.031118 | -0.045282 | 0.402882 | 0.353291 | 0.402069 | 0.101362 | -0.526965 | -0.000307 | -0.238901 | 0.010953 | 0.493471 | -0.431548 | -0.004419 | -0.312021 | 0.560888 | 0.380271 | -0.353520 | -0.306808 | 0.016 |
| 53441 | 259 | 1.380311 | -0.029100 | -0.041220 | 0.406324 | 0.402557 | 0.451432 | 0.110201 | -0.484686 | -0.015580 | -0.250492 | 0.012530 | 0.540803 | -0.461318 | -0.018649 | -0.313141 | 0.604578 | 0.414282 | -0.383076 | -0.321193 | 0.008 |
| 53442 | 259 | 1.394820 | -0.003704 | -0.022787 | 0.440027 | 0.437856 | 0.501191 | 0.083898 | -0.512307 | -0.038488 | -0.254639 | 0.009122 | 0.579207 | -0.493631 | -0.038566 | -0.323499 | 0.653457 | 0.433732 | -0.429148 | -0.337808 | 0.000 |
53443 rows × 21 columns
📌 Prepare Training, Validation and Test Dataloaders.
For training process, I will take batches of 128.
For the validation process, I will take the entire validation set (all the windows avaiable).
For the test process, I will take the LAST window of each engine's given trajectory, thus it will have exactly 100 X(size 20 window)s and 100 y(RUL)s.
class data(Dataset):
def __init__(self, list_indices, df_train):
self.indices = list_indices
self.df_train = df_train
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
ind = self.indices[idx]
X_ = self.df_train.iloc[ind : ind + window, :].drop(['unit','rul'], axis = 1).copy().to_numpy()
y_ = self.df_train.iloc[ind + window - 1]['rul']
return X_, y_
train = data(train_indices, df_train)
val = data(val_indices, df_train)
trainloader = DataLoader(train, batch_size = 256, shuffle = True)
valloader = DataLoader(val, batch_size = 128, shuffle = True)
units = np.arange(1, no_units+1)
class test(Dataset):
def __init__(self, units, df_test):
self.units = units
self.df_test = df_test
def __len__(self):
return len(self.units)
def __getitem__(self, idx):
n = self.units[idx]
U = self.df_test[self.df_test['unit'] == n].copy()
X_ = U.reset_index().iloc[-window:,:].drop(['index', 'unit','rul'], axis = 1).copy().to_numpy()
y_ = U['rul'].min()
return X_, y_
test = test(units, df_test)
testloader = DataLoader(test, batch_size = 100)
dataiter = iter(trainloader)
x,y = next(dataiter)
x.shape
torch.Size([256, 15, 19])
2. Model Building and Training¶
📌 I will be using a Model incoprorating Attention Mechanism with pairwise interactions and positional awareness layers before the final output.
import matplotlib.pyplot as pyplot
import torch.optim.lr_scheduler as lr_scheduler
def anderson(f, x0, m=5, lam=1e-4, max_iter=50, tol=1e-2, beta=1.0, verbose=False):
"""Improved Anderson acceleration for fixed-point iteration."""
# Shape parameters
batch, channels, dim = x0.shape
# Storage for historical X and F
X = torch.zeros(batch, m, channels * dim, dtype=x0.dtype, device=x0.device)
F = torch.zeros(batch, m, channels * dim, dtype=x0.dtype, device=x0.device)
# Initialize X and F with the first two iterations
X[:, 0], F[:, 0] = x0.view(batch, -1), f(x0).view(batch, -1)
X[:, 1], F[:, 1] = F[:, 0], f(F[:, 0].view(batch, channels, dim)).view(batch, -1)
# Prepare the H matrix and y vector for Anderson acceleration
H = torch.zeros(batch, m + 1, m + 1, dtype=x0.dtype, device=x0.device)
H[:, 0, 1:] = H[:, 1:, 0] = 1 # First row and column for normalization
y = torch.zeros(batch, m + 1, 1, dtype=x0.dtype, device=x0.device)
y[:, 0, 0] = 1 # y vector for the Anderson step
res = [] # To store residuals
for k in range(2, max_iter):
n = min(k, m) # Number of iterations to consider
# Compute residual matrix G
G = F[:, :n] - X[:, :n]
# Compute the H matrix (Gramian + regularization)
GTG = torch.bmm(G, G.transpose(1, 2))
H[:, 1:n+1, 1:n+1] = GTG + lam * torch.eye(n, dtype=x0.dtype, device=x0.device)[None]
# Solve for alpha using least squares
try:
alpha = torch.linalg.solve(H[:, :n+1, :n+1], y[:, :n+1])[:, 1:n+1, 0] # (batch x n)
except RuntimeError as e:
if verbose:
print(f"Solver failed at iteration {k}: {e}")
break
# Update X and F
X_update = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
idx = k % m
X[:, idx] = X_update
F[:, idx] = f(X[:, idx].view(batch, channels, dim)).view(batch, -1)
# Compute residuals
residual = (F[:, idx] - X[:, idx]).norm(dim=1) / (1e-5 + X[:, idx].norm(dim=1).clamp(min=1e-5))
res.append(residual.mean().item()) # Mean residual across batch
if verbose:
print(f"Iteration {k}, residual: {res[-1]:.6f}")
# Check for convergence
if res[-1] < tol:
if verbose:
print(f"Converged at iteration {k}, residual: {res[-1]:.6f}")
break
return X[:, idx].view(batch, channels, dim), (res, k)
import torch.autograd as autograd
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Using device:', device)
class DEQFixedPoint(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x):
# Compute forward pass and re-engage autograd tape
b, c, f = x.shape
with torch.no_grad(): # Ensure no gradients for intermediate calculations
z, self.stats = self.solver(lambda z: self.f(z, x), torch.zeros((b, 4 * c, f), device=device), **self.kwargs)
z = self.f(z, x)
# Set z to require gradients before the hook
z = z.requires_grad_() # Ensure z requires gradients
# Set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_() # Ensure z0 requires gradients
f0 = self.f(z0, x)
def backward_hook(grad):
# Perform the backward pass using the solver
g, self.backward_res = self.solver(
lambda y: autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
# Register the backward hook
z.register_hook(backward_hook)
return z, self.stats
Using device: cuda
class DualInputAttention(nn.Module):
def __init__(self, channels, feature_size):
super(DualInputAttention, self).__init__()
self.feature_size = feature_size
self.query_proj = nn.Linear(self.feature_size, self.feature_size)
self.key_proj = nn.Linear(self.feature_size, self.feature_size)
self.value_proj = nn.Linear(self.feature_size, self.feature_size)
self.output_proj = nn.Linear(self.feature_size, self.feature_size)
self.query_proj.weight.data.normal_(0, 0.01)
self.key_proj.weight.data.normal_(0, 0.01)
self.value_proj.weight.data.normal_(0, 0.01)
self.output_proj.weight.data.normal_(0, 0.01)
def forward(self, input1, input2):
"""
Args:
input1: Tensor of shape (batch, channels, feature_size)
input2: Tensor of shape (batch, channels, feature_size)
Returns:
Tensor of shape (batch, channels, feature_size), attended features.
"""
# Project inputs
query = self.query_proj(input1) # (batch, channels, feature_size)
key = self.key_proj(input2) # (batch, channels, feature_size)
value = self.value_proj(input2) # (batch, channels, feature_size)
# Compute similarity (scaled dot product)
scores = torch.einsum('bci,bcj->bcij', query, key) # (batch, channels, feature_size, feature_size)
scores = scores / (self.feature_size ** 0.5)
# Compute attention weights
attention_weights = F.softmax(scores, dim=-1) # (batch, channels, feature_size, feature_size)
# Compute attended values
attended = torch.einsum('bcij,bcj->bci', attention_weights, value) # (batch, channels, feature_size)
# Combine with input1
output = self.output_proj(attended + input1) # (batch, channels, feature_size)
return output
class DELayer(nn.Module):
def __init__(self, n_channels, feature_size, kernel_size=3):
super().__init__()
num_groups = 4
self.conv0 = nn.Conv1d(n_channels, 2 * n_channels, kernel_size=1, padding='same', bias=False)
self.conv1 = nn.Conv1d(2 * n_channels, 2 * n_channels, kernel_size=kernel_size, padding='same', bias=False)
self.conv2 = nn.Conv1d(2 * n_channels, 2 * n_channels, kernel_size=kernel_size, padding='same', bias=False)
self.norm1 = nn.GroupNorm(num_groups, 4 * n_channels)
self.norm2 = nn.GroupNorm(num_groups, 4 * n_channels)
# self.norm3 = nn.GroupNorm(num_groups, 2 * n_channels)
self.attention = DualInputAttention(4 * n_channels, feature_size)
self.conv0.weight.data.normal_(0, 0.01)
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
def forward(self, z0, x):
# z = self.conv2(self.norm1(F.relu(self.conv1(z0))))
# out = self.attention (self.norm2(z),self.norm3(x))
# z = self.norm4(F.relu(out))
x = F.relu(self.conv0(x))
x_conv1 = F.relu(self.conv1(x))
x_conv2 = F.relu(self.conv2(x_conv1))
x_concat = torch.cat([x_conv1, x_conv2], dim=1)
x = self.norm1(x_concat)
# z = self.norm2(z0)
z = self.norm2(F.relu(self.attention(z0, x)))
return z
# Define the Bayesian Neural Network with Dropout
class BNN(nn.Module):
def __init__(self, initial_channels, hidden_dim, dropout):
super(BNN, self).__init__()
self.dropout_value = dropout
self.fc1 = nn.Linear(4 * initial_channels, 8* hidden_dim)
self.dropout1 = nn.Dropout(self.dropout_value) # Dropout for Monte Carlo Dropout
self.fc2 = nn.Linear(8 * hidden_dim, 4 * hidden_dim)
self.dropout2 = nn.Dropout(self.dropout_value)
self.fc3 = nn.Linear(4 * hidden_dim, 1)
def forward(self, x, training=True):
x = torch.relu(self.fc1(x))
x = self.dropout1(x) if training else x # Apply dropout during training and inference
x = torch.relu(self.fc2(x))
x = self.dropout2(x) if training else x # Apply dropout during training and inference
return self.fc3(x)
class RegressorModel(nn.Module):
def __init__(self, initial_channels, feature_size, hidden_dim, dropout):
super(RegressorModel, self).__init__()
# Layers
# self.conv =nn.Conv1d(initial_channels, 2 * initial_channels, kernel_size=1, bias=False)
# self.norm = nn.GroupNorm(4, 2 * initial_channels)
self.f = DELayer(n_channels= initial_channels, feature_size=feature_size )
self.DEQ = DEQFixedPoint(self.f, anderson, tol=1e-4, max_iter=200, beta=1.0)
self.pool = nn.AdaptiveAvgPool1d(1)
self.feedforward = BNN(initial_channels, hidden_dim, dropout)
def forward(self, x, training=True):
# x = self.norm(F.relu(self.conv(x)))
x = self.DEQ(x)
x = self.pool(x[0]).squeeze(-1) # Apply Deep Equilibrium Model
# x = torch.flatten(x, start_dim=1)
output = self.feedforward(x, training) # Apply feedforward Model for regression
return output
learning_rate = 1e-3
init_channels = window
inner_channels = window
n_hidden_units = window
set_dropout = 0.5
model = RegressorModel(initial_channels=init_channels, feature_size=n_features, hidden_dim=n_hidden_units, dropout=set_dropout ).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
X= torch.randn((64, window, n_features), device=device)
f = DELayer(n_channels=init_channels, feature_size= n_features).to(device)
DEQ = DEQFixedPoint(f, anderson, tol=1e-4, max_iter=100, beta=0.9).to(device)
out = DEQ(X)[0]
print(out.shape)
out2 = nn.AdaptiveAvgPool1d(1)(out).squeeze(-1)
print(out2.shape)
out3 = model(X)[0]
print(out3.shape)
torch.Size([64, 60, 19]) torch.Size([64, 60]) torch.Size([1])
📌 Validation and Test Functions
# def validation(loss_fn, num_samples=100):
# model.train() # Enable stochastic behavior (dropout during inference)
# total_loss = 0.0
# total_samples = 0
# with torch.no_grad():
# for X, y in valloader:
# X, y = X.to(device).float(), y.to(device).float()
# stochastic_predictions = []
# # Perform stochastic forward passes
# for _ in range(num_samples):
# y_pred = model(X, training=True).squeeze()
# stochastic_predictions.append(y_pred.cpu().numpy())
# # Convert to NumPy array
# stochastic_predictions = np.array(stochastic_predictions) # Shape: (num_samples, batch_size)
# # Calculate mean prediction
# mean_pred = torch.tensor(np.mean(stochastic_predictions, axis=0)).to(device)
# # Compute batch loss
# loss = loss_fn(mean_pred, y)
# total_loss += loss.item() * X.size(0) # Accumulate the weighted batch loss
# total_samples += X.size(0)
# avg_val_loss = total_loss / total_samples # Calculate average loss
# return avg_val_loss
def validation(loss_fn):
model.eval() # Set the model to evaluation mode
total_loss = 0.0
total_samples = 0
with torch.no_grad(): # Disable gradient computation for validation
for X, y in valloader: # Iterate over all batches in the validation loader
X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
# Use enable_grad for specific models if required
if isinstance(model, RegressorModel): # Replace with the DEQ model class
with torch.enable_grad():
y_pred = model(X).squeeze()
else:
y_pred = model(X).squeeze()
y = y.squeeze()
loss = loss_fn(y_pred, y)
total_loss += loss.item() * X.size(0) # Accumulate the weighted batch loss
total_samples += X.size(0) # Count the total samples processed
avg_val_loss = total_loss / total_samples # Calculate average loss
return avg_val_loss
loss_L1 = nn.L1Loss()
def test():
model.train() # Enable stochastic behavior (dropout during inference)
total_loss_MSE = 0.0
total_loss_L1 = 0.0
total_ASUE = 0.0
all_stochastic_predictions = []
all_pred_values = []
all_true_values = []
all_uncertainties = [] # Collect uncertainties
num_samples = 100
with torch.no_grad():
for X, y in testloader:
X, y = X.to(device).float(), y.to(device).float()
stochastic_predictions = []
# Perform stochastic forward passes
for _ in range(num_samples):
y_pred = model(X, training=True).squeeze()
stochastic_predictions.append(y_pred.cpu().numpy())
# Convert to NumPy array
stochastic_predictions = np.array(stochastic_predictions) # Shape: (num_samples, batch_size)
# Calculate mean and uncertainty
mean_pred = torch.tensor(np.mean(stochastic_predictions, axis=0)).to(device)
uncertainty_batch = np.var(stochastic_predictions, axis=0) # Variance as uncertainty
# Compute batch losses
loss_MSE = torch.mean((mean_pred - y) ** 2).item()
loss_L1_val = loss_L1(mean_pred, y).item()
ASUE = torch.mean(torch.relu(y - mean_pred)).item()
# Aggregate results
total_loss_MSE += loss_MSE * X.size(0)
total_loss_L1 += loss_L1_val * X.size(0)
total_ASUE += ASUE * X.size(0)
# Collect predictions, true values, and uncertainties
all_stochastic_predictions.append(stochastic_predictions)
all_pred_values.append(mean_pred.cpu().numpy())
all_true_values.append(y.cpu().numpy())
all_uncertainties.append(uncertainty_batch)
# Average metrics
total_samples = len(testloader.dataset)
avg_loss_MSE = total_loss_MSE / total_samples
avg_loss_L1 = total_loss_L1 / total_samples
avg_ASUE = total_ASUE / total_samples
# Concatenate all predictions, uncertainties, and true values
all_pred_values = np.concatenate([x.flatten() for x in all_pred_values], axis=0)
all_stochastic_predictions = np.concatenate(all_stochastic_predictions, axis=1)
all_true_values = np.concatenate(all_true_values, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0) # Ensure consistency
return avg_loss_MSE, avg_loss_L1, avg_ASUE, all_uncertainties, all_stochastic_predictions, all_pred_values, all_true_values
📌 Training Loop: I have trained using Adam Optimizer for 35 epochs with learning rate = 0.01 and scheduler with degration 0.5 after 8 epochs
# Initialize lists for tracking losses
T, V = [], []
epochs = 35
loss_fn = nn.MSELoss()
siterations, sresiduals = [], []
stats = {}
# Define forward hook for DEQ statistics
def forward_hook(module, input, output):
stats[module] = output
# Register forward hook
hook_handle = model.DEQ.register_forward_hook(forward_hook)
best_val_loss = float('inf') # Track best validation loss
pbar = tqdm(range(epochs), desc="Training Progress", dynamic_ncols=True)
for epoch in pbar:
model.train()
epoch_loss = 0
sresiduals_list, siterations_list = [], []
for X, y in trainloader:
X, y = X.to(device, dtype=torch.float32), y.to(device, dtype=torch.float32)
# Forward pass
y_pred = model(X).squeeze()
y = y.squeeze()
loss = loss_fn(y_pred, y)
epoch_loss += loss.item()
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Collect DEQ statistics
iterations = stats.get(model.DEQ, (None, [0, 0]))[1]
if iterations and len(iterations) >= 2:
sresiduals_list.append(np.mean(iterations[0]))
siterations_list.append(iterations[1])
# Step the scheduler
scheduler.step()
# Validation loss
val_loss = validation(loss_fn)
T.append(epoch_loss / len(trainloader))
V.append(val_loss)
# Update tqdm bar
pbar.set_postfix({'Train Loss': T[-1], 'Val Loss': val_loss})
# Track DEQ statistics
sresiduals.append(np.mean(sresiduals_list) if sresiduals_list else 0)
siterations.append(np.mean(siterations_list) if siterations_list else 0)
# Save best model
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), 'best_model.pth')
print(f'Epoch {epoch + 1}/{epochs} - Train Loss: {T[-1]:.4f}, Val Loss: {V[-1]:.4f}')
# Clean up hook handle
hook_handle.remove()
print(sresiduals)
print(siterations)
torch.save(model.state_dict(), f'saved_models\\DEM_{file_name}')
# Perform testing with Monte Carlo Dropout
mse, l1, asue, uncertainty, all_y_pred, y_pred, y = test()
# Calculate RMSE and scale by RUL max value
rmse = rul_max * np.sqrt(mse)
# Print results
print(f"Test Results:\n"
f"RMSE: {round(rmse, 2)}\n"
f"L1 Loss: {round(l1, 2)}\n"
f"ASUE: {round(asue, 2)}")
# Mean uncertainty
mean_uncertainty = np.mean(uncertainty)
print(f"Mean Uncertainty: {round(mean_uncertainty, 4)}")
# Optional: Visualize predictions, true values, and uncertainties
import matplotlib.pyplot as plt
plt.figure(figsize=(14, 8))
# True vs predicted RUL
plt.plot(range(len(y)), y, label="True Values", c="lightseagreen", marker=".", alpha=0.7)
plt.plot(range(len(y_pred)), y_pred, label="Predicted Values", c="salmon", marker=".", alpha=0.7)
# Uncertainty as error bars
plt.fill_between(
range(len(y_pred)),
y_pred - np.sqrt(uncertainty),
y_pred + np.sqrt(uncertainty),
color="salmon", alpha=0.2, label="Uncertainty"
)
# Add labels and legend
plt.xlabel("Sample Index", fontsize=14)
plt.ylabel("RUL", fontsize=14)
plt.title("True vs Predicted RUL with Uncertainty", fontsize=16)
plt.legend(fontsize=12)
plt.grid(True)
plt.savefig(f'figures\\Responses_with_Uncertainty_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
print(y)
print(y_pred)
print(1*(y_pred>y))
print(all_y_pred.shape)
print(all_y_pred)
# Create a figure and axes
fig, ax = plt.subplots(1, 1, figsize=(14, 8))
# Plot predictions
ax.plot(np.arange(1, y_pred.shape[0] + 1), y_pred, label='Predictions', color='salmon', marker='.')
# Plot true values
ax.plot(np.arange(1, y.shape[0] + 1), y, label='True Values', color='lightseagreen', marker='.')
# Set limits, labels, grid, and legend
ax.set_ylim([0, 1])
ax.set_xlabel('Test Engine Units', fontsize=16)
ax.set_ylabel('RUL', fontsize=16)
ax.grid(True)
ax.legend()
plt.savefig(f'figures\\Responses_without_Uncertainty_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
# Show the plot
plt.show()
plt.plot(np.arange(1,len(T)+1), T, label= 'Train loss')
plt.plot(np.arange(1,len(V)+1), V, label = 'Validation loss')
plt.legend()
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('MSE')
plt.savefig(f'figures\\Training_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show
fig, ax = plt.subplots(figsize = (12,8))
def animate(i):
ax.clear()
line1, = ax.plot(np.arange(1,i+1), T[:i], label = 'train_loss')
line2, = ax.plot(np.arange(1,i+1), V[:i], label = 'val_loss')
ax.legend()
ax.grid(True)
ax.set_xlim(0,101)
ax.set_ylim(0,4000)
ax.set_xlabel('epochs')
ax.set_ylabel('MSE')
return line1, line2
animation = FA(fig, animate, np.arange(1,len(T)+1), interval = 50)
%time animation.save('animation3.gif', writer='imagemagick', fps=20)
plt.close(fig)
# Count total parameters
total_params = sum(p.numel() for p in model.parameters())
print(f"Total number of parameters: {total_params}")
# Count trainable parameters only
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Trainable parameters: {trainable_params}")
from torchinfo import summary
summary(model, input_size=(64, window, n_features))
3. Prediction on Test Set¶
model.load_state_dict(torch.load(f'saved_models\\DEM_{file_name}', map_location=torch.device(device)))
C:\Users\spiro\AppData\Local\Temp\ipykernel_14036\4004060783.py:1: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
model.load_state_dict(torch.load(f'saved_models\\DEM_{file_name}', map_location=torch.device(device)))
<All keys matched successfully>
Phm08 Score Metric
def calculate_phm08_score(true_rul, predicted_rul, alpha=13, beta=10):
"""
Calculate the PHM08 score metric for RUL prediction.
Parameters:
true_rul (array-like): Array of true RUL values.
predicted_rul (array-like): Array of predicted RUL values.
alpha (float): Scaling factor for early predictions (default=13).
beta (float): Scaling factor for late predictions (default=10).
Returns:
float: Total PHM08 score.
"""
true_rul = np.array(true_rul)
predicted_rul = np.array(predicted_rul)
if true_rul.shape != predicted_rul.shape:
raise ValueError("Shape mismatch between true and predicted RUL arrays")
d = (predicted_rul - true_rul) * MAX_RUL
score = np.where(d < 0, np.exp(-d / alpha) - 1, np.exp(d / beta) - 1)
return np.sum(score)
num_samples = 200
def test(num_samples=num_samples):
model.train() # Enable stochastic behavior (dropout during inference)
total_loss_MSE = 0.0
total_loss_L1 = 0.0
total_ASUE = 0.0
total_phm08_score = 0.0
all_stochastic_predictions = []
all_pred_values = []
all_true_values = []
all_uncertainties = [] # Collect uncertainties
with torch.no_grad():
for X, y in testloader:
X, y = X.to(device).float(), y.to(device).float()
stochastic_predictions = []
# Perform stochastic forward passes
for _ in range(num_samples):
y_pred = model(X, training=True).squeeze()
stochastic_predictions.append(y_pred.cpu().numpy())
# Convert to NumPy array
stochastic_predictions = np.array(stochastic_predictions) # Shape: (num_samples, batch_size)
# Calculate mean and uncertainty with std
mean_pred = torch.tensor(np.mean(stochastic_predictions, axis=0)).to(device)
uncertainty_batch = np.std(stochastic_predictions, axis=0) # Variance as uncertainty
# Compute batch losses
loss_MSE = torch.mean((mean_pred - y) ** 2).item()
loss_L1_val = loss_L1(mean_pred, y).item()
ASUE = torch.mean(torch.relu(y - mean_pred)).item()
# Compute PHM08 score
phm08_score = calculate_phm08_score(y.cpu().numpy(), mean_pred.cpu().numpy())
# Aggregate results
total_loss_MSE += loss_MSE * X.size(0)
total_loss_L1 += loss_L1_val * X.size(0)
total_ASUE += ASUE * X.size(0)
total_phm08_score += phm08_score
# Collect predictions, true values, and uncertainties
all_stochastic_predictions.append(stochastic_predictions)
all_pred_values.append(mean_pred.cpu().numpy())
all_true_values.append(y.cpu().numpy())
all_uncertainties.append(uncertainty_batch)
# Average metrics
total_samples = len(testloader.dataset)
avg_loss_MSE = total_loss_MSE / total_samples
avg_loss_L1 = total_loss_L1 / total_samples
avg_ASUE = total_ASUE / total_samples
avg_phm08_score = total_phm08_score
# Concatenate all predictions, uncertainties, and true values
all_pred_values = np.concatenate([x.flatten() for x in all_pred_values], axis=0)
all_stochastic_predictions = np.concatenate(all_stochastic_predictions, axis=1)
all_true_values = np.concatenate(all_true_values, axis=0)
all_uncertainties = np.concatenate(all_uncertainties, axis=0) # Ensure consistency
return avg_loss_MSE, avg_loss_L1, avg_ASUE, avg_phm08_score, all_uncertainties, all_stochastic_predictions, all_pred_values, all_true_values
# Perform testing with Monte Carlo Dropout
mse, l1, asue, score, uncertainty, all_y_pred, y_pred, y = test()
# Calculate RMSE and scale by RUL max value
rmse = rul_max * np.sqrt(mse)
# Print results
print(f"Test Results:\n"
f"RMSE: {round(rmse, 2)}\n"
f"Score: {round(score, 2)}\n"
f"L1 Loss: {round(l1, 2)}\n"
f"ASUE: {round(asue, 2)}")
# Mean uncertainty
mean_uncertainty = np.mean(uncertainty)
print(f"Mean Uncertainty: {round(mean_uncertainty, 4)}")
# Optional: Visualize predictions, true values, and uncertainties
import matplotlib.pyplot as plt
plt.figure(figsize=(14, 8))
# True vs predicted RUL
plt.plot(range(len(y)), y, label="True Values", c="lightseagreen", marker=".", alpha=0.7)
plt.plot(range(len(y_pred)), y_pred, label="Predicted Values", c="salmon", marker=".", alpha=0.7)
# Uncertainty as error bars
plt.fill_between(
range(len(y_pred)),
y_pred - uncertainty,
y_pred + uncertainty,
color="salmon", alpha=0.2, label="Uncertainty"
)
# Add labels and legend
plt.xlabel("Sample Index", fontsize=14)
plt.ylabel("RUL", fontsize=14)
plt.title("True vs Predicted RUL with Uncertainty", fontsize=16)
plt.legend(fontsize=12)
plt.grid(True)
plt.savefig(f'figures\\Responses_with_Uncertainty_for_dataset_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
Test Results: RMSE: 12.02 Score: 643.05 L1 Loss: 0.07 ASUE: 0.04 Mean Uncertainty: 0.06729999929666519
values = np.array([y, y_pred, uncertainty]).T
df_values = pd.DataFrame(values)
values = df_values.sort_values(0, ascending=False).to_numpy()
y, y_pred, uncertainty = np.array(values).T
# y, y_pred, uncertainty = MAX_RUL * np.array(values).T
# uncertainty = MAX_RUL * uncertainty
plt.hist(uncertainty, bins=50, color="blue", alpha=0.7, label="Uncertainty Distribution")
plt.xlabel("Uncertainty")
plt.ylabel("Frequency")
plt.title("Histogram of Prediction Uncertainty")
plt.legend()
plt.grid(True)
plt.show()
# confidence = 0.99 # Confidence level
# zeta = 2.576
# # Calculate confidence interval
# standard_error = uncertainty / np.sqrt(num_samples) # Standard error of the mean
# margin_of_error = zeta * standard_error # Margin of error
confidence = 0.95 # Confidence level
margin_of_error = 2 * uncertainty # Margin of error
plt.figure(figsize=(14, 8))
# True vs Predicted Values
plt.plot(range(len(y)), y, label="True Values", c="lightseagreen", marker="o", alpha=0.7)
plt.scatter(range(len(y_pred)), y_pred, label="Predicted Values", c="salmon", edgecolor="black", zorder=3)
# Draw vertical lines from predictions to actual values
for i in range(len(y)):
plt.plot([i, i], [y_pred[i], y[i]], c="gray", linestyle="--", alpha=0.6) # Line from y_pred to y
# plt.scatter(i, y_pred[i], c="salmon", edgecolor="black", zorder=3) # Circle on true value
# Add confidence interval as a shaded region
plt.fill_between(
range(len(y_pred)),
y_pred - margin_of_error,
y_pred + margin_of_error,
color="salmon",
alpha=0.2,
label=f"{int(confidence * 100)}% Confidence Interval"
)
# plt.errorbar(
# range(len(y_pred)), y_pred, yerr=uncertainty, fmt="o",
# ecolor="salmon", elinewidth=1.5, alpha=0.6, capsize=4, label="Uncertainty (95% CI)"
# )
# Add labels, legend, and grid
plt.xlabel("Sample Index", fontsize=14)
plt.ylabel("RUL", fontsize=14)
plt.title("True vs Predicted RUL with Confidence Interval", fontsize=16)
plt.legend(fontsize=12)
plt.grid(True)
# Save or display the plot
plt.savefig(f'figures\\True_vs_PredictedRUL_with_Confidence_Interval_{file_name}.png', dpi=300, bbox_inches="tight")
plt.show()
# Calculate errors and mean predictions
errors = np.abs(y_pred.mean(axis=0) - y)
# Scatter plot with enhancements
plt.figure(figsize=(8, 6))
sc = plt.scatter(uncertainty, errors, c=errors, cmap='viridis', alpha=0.7, edgecolor='k')
# Add colorbar
cbar = plt.colorbar(sc)
cbar.set_label('Error Magnitude')
# Add grid and labels
plt.grid(alpha=0.3)
plt.xlabel('Uncertainty', fontsize=12)
plt.ylabel('Error', fontsize=12)
plt.title('Uncertainty vs. Error', fontsize=14)
# Optional: Fit a trend line
z = np.polyfit(uncertainty, errors, 1)
p = np.poly1d(z)
plt.plot(uncertainty, p(uncertainty), color='red', linestyle='--', label='Trend Line')
# Add legend
plt.legend()
# Show the plot
plt.tight_layout()
plt.savefig(f'figures\\UncertaintyVSError_{file_name}.png', dpi=300, bbox_inches='tight')
plt.show()
# Compute statistics
correlation = np.corrcoef(uncertainty, errors)[0, 1]
print(f"Correlation between uncertainty and error: {correlation:.2f}")
# Evaluate performance in high/low uncertainty regions
threshold = np.percentile(uncertainty, 75) # Upper 25% uncertainty
high_uncertainty_mask = uncertainty > threshold
low_uncertainty_mask = uncertainty <= threshold
high_uncertainty_error = errors[high_uncertainty_mask].mean()
low_uncertainty_error = errors[low_uncertainty_mask].mean()
print(f"Mean error in high uncertainty regions: {high_uncertainty_error:.2f}")
print(f"Mean error in low uncertainty regions: {low_uncertainty_error:.2f}")
Correlation between uncertainty and error: 0.78 Mean error in high uncertainty regions: 0.47 Mean error in low uncertainty regions: 0.25
4. Prediction on Test Set Without Dropout in the Inference¶
model.load_state_dict(torch.load(f'saved_models\\DEM_{file_name}', map_location=torch.device(device)))
def calculate_phm08_score(true_rul, predicted_rul, alpha=13, beta=10):
"""
Calculate the PHM08 score metric for RUL prediction.
Parameters:
true_rul (array-like): Array of true RUL values.
predicted_rul (array-like): Array of predicted RUL values.
alpha (float): Scaling factor for early predictions (default=13).
beta (float): Scaling factor for late predictions (default=10).
Returns:
float: Total PHM08 score.
"""
true_rul = np.array(true_rul)
predicted_rul = np.array(predicted_rul)
if true_rul.shape != predicted_rul.shape:
raise ValueError("Shape mismatch between true and predicted RUL arrays")
d = (predicted_rul - true_rul) * MAX_RUL
score = np.where(d < 0, np.exp(-d / alpha) - 1, np.exp(d / beta) - 1)
return np.sum(score)
def test_without_dropout():
model.eval() # Disable stochastic behavior (dropout during inference)
total_loss_MSE = 0.0
total_loss_L1 = 0.0
total_ASUE = 0.0
total_phm08_score = 0.0
all_pred_values = []
all_true_values = []
with torch.no_grad():
for X, y in testloader:
X, y = X.to(device).float(), y.to(device).float()
# Perform a single deterministic forward pass
y_pred = model(X).squeeze()
# Compute batch losses
loss_MSE = torch.mean((y_pred - y) ** 2).item()
loss_L1_val = loss_L1(y_pred, y).item()
ASUE = torch.mean(torch.relu(y - y_pred)).item()
# Compute PHM08 score
phm08_score = calculate_phm08_score(y.cpu().numpy(), y_pred.cpu().numpy())
# Aggregate results
total_loss_MSE += loss_MSE * X.size(0)
total_loss_L1 += loss_L1_val * X.size(0)
total_ASUE += ASUE * X.size(0)
total_phm08_score += phm08_score
# Collect predictions and true values
all_pred_values.append(y_pred.cpu().numpy())
all_true_values.append(y.cpu().numpy())
# Average metrics
total_samples = len(testloader.dataset)
avg_loss_MSE = total_loss_MSE / total_samples
avg_loss_L1 = total_loss_L1 / total_samples
avg_ASUE = total_ASUE / total_samples
avg_phm08_score = total_phm08_score
# Concatenate all predictions and true values
all_pred_values = np.concatenate([x.flatten() for x in all_pred_values], axis=0)
all_true_values = np.concatenate(all_true_values, axis=0)
return avg_loss_MSE, avg_loss_L1, avg_ASUE, avg_phm08_score, all_pred_values, all_true_values
# Perform testing with Monte Carlo Dropout
mse, l1, asue, score, y_pred, y = test_without_dropout()
# Calculate RMSE and scale by RUL max value
rmse = rul_max * np.sqrt(mse)
# Print results
print(f"Test Results:\n"
f"RMSE: {rmse:.2f}\n"
f"Score: {score:.2f}\n"
f"L1 Loss: {l1:.2f}\n"
f"ASUE: {asue:.3f}")
Test Results: RMSE: 12.03 Score: 647.43 L1 Loss: 0.07 ASUE: 0.037
5. Methods for Weighting Predictions by Uncertainty¶
# Perform testing with Monte Carlo Dropout
mse, l1, asue, score, uncertainty, all_y_pred, y_pred, y = test()
loss_MSE = np.mean((y_pred - y) ** 2).item()
rmse = rul_max * np.sqrt(mse)
print(f'RMSE:{rmse:.2f}')
RMSE:11.99
all_y_pred_mean = all_y_pred.mean(axis=0)
weights = abs(all_y_pred - all_y_pred_mean)
weights = np.exp(-weights)
# weights = 1 / weights
weights /= weights.sum(axis=0)
weighted_predictions = np.sum(weights * all_y_pred, axis=0)
mse = np.mean((weighted_predictions - y) ** 2).item()
rmse = rul_max * np.sqrt(mse)
print(f'RMSE:{rmse:.2f}')
RMSE:11.99
Threshold In Uncertainty
# Define a threshold based on deviations (e.g., 1.5x the mean deviation)
threshold = 1.5 * weights.mean(axis=0)
# Mask predictions with deviations exceeding the threshold
valid_predictions = np.where(weights <= threshold, all_y_pred, np.nan)
# Compute predictions as the mean of valid predictions
weighted_predictions = np.nanmean(valid_predictions, axis=0)
mse = np.mean((weighted_predictions - y) ** 2).item()
rmse = rul_max * np.sqrt(mse)
print(f'RMSE:{rmse:.2f}')
RMSE:11.99